Fix writing to mmap'ed /dev/mem region mapped PROT_WRITE
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Fri, 2 Sep 2005 16:51:55 +0000 (16:51 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Fri, 2 Sep 2005 16:51:55 +0000 (16:51 +0000)
and MAP_PRIVATE. This is in fact a generic Linux bug.

Signed-off-by: Keir Fraser <keir@xensource.com>
linux-2.6-xen-sparse/mm/memory.c

index 27268188547ccb88f4ddadbcaa10c0ab10a25dc6..e5d776921e5f3d10036a0dbcb0e5d4ef9a65076f 100644 (file)
@@ -1367,20 +1367,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
        struct page *old_page, *new_page;
        unsigned long pfn = pte_pfn(pte);
        pte_t entry;
+       struct page invalid_page;
 
        if (unlikely(!pfn_valid(pfn))) {
-               /*
-                * This should really halt the system so it can be debugged or
-                * at least the kernel stops what it's doing before it corrupts
-                * data, but for the moment just pretend this is OOM.
-                */
-               pte_unmap(page_table);
-               printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
-                               address);
-               spin_unlock(&mm->page_table_lock);
-               return VM_FAULT_OOM;
+               /* This can happen with /dev/mem (PROT_WRITE, MAP_PRIVATE). */
+               invalid_page.flags = (1<<PG_reserved) | (1<<PG_locked);
+               old_page = &invalid_page;
+       } else {
+               old_page = pfn_to_page(pfn);
        }
-       old_page = pfn_to_page(pfn);
 
        if (!TestSetPageLocked(old_page)) {
                int reuse = can_share_swap_page(old_page);
@@ -1416,7 +1411,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
                new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
                if (!new_page)
                        goto no_new_page;
-               copy_user_highpage(new_page, old_page, address);
+               if (old_page == &invalid_page) {
+                       char *vto = kmap_atomic(new_page, KM_USER1);
+                       copy_page(vto, (void *)(address & PAGE_MASK));
+                       kunmap_atomic(vto, KM_USER1);
+               } else {
+                       copy_user_highpage(new_page, old_page, address);
+               }
        }
        /*
         * Re-check the pte - we dropped the lock